In [1]:
import keras
from keras import layers
import numpy as np


Using TensorFlow backend.

In [2]:
latent_dim = 32
height = 32
width = 32
channels = 3

In [3]:
generator_input = keras.Input(shape=(latent_dim, ))

x = layers.Dense(128 * 16 * 16)(generator_input)
x = layers.LeakyReLU()(x)
x = layers.Reshape((16, 16, 128))(x)

x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)

x = layers.Conv2DTranspose(256, 4, strides=2, padding='same')(x)
x = layers.LeakyReLU()(x)

x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)

x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)

x = layers.Conv2D(channels, 7, activation='tanh', padding='same')(x)

generator = keras.models.Model(generator_input, x)
generator.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 32)                0         
_________________________________________________________________
dense_1 (Dense)              (None, 32768)             1081344   
_________________________________________________________________
leaky_re_lu_1 (LeakyReLU)    (None, 32768)             0         
_________________________________________________________________
reshape_1 (Reshape)          (None, 16, 16, 128)       0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 16, 16, 256)       819456    
_________________________________________________________________
leaky_re_lu_2 (LeakyReLU)    (None, 16, 16, 256)       0         
_________________________________________________________________
conv2d_transpose_1 (Conv2DTr (None, 32, 32, 256)       1048832   
_________________________________________________________________
leaky_re_lu_3 (LeakyReLU)    (None, 32, 32, 256)       0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 32, 32, 256)       1638656   
_________________________________________________________________
leaky_re_lu_4 (LeakyReLU)    (None, 32, 32, 256)       0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 32, 32, 256)       1638656   
_________________________________________________________________
leaky_re_lu_5 (LeakyReLU)    (None, 32, 32, 256)       0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 32, 32, 3)         37635     
=================================================================
Total params: 6,264,579
Trainable params: 6,264,579
Non-trainable params: 0
_________________________________________________________________

In [4]:
discriminator_input = layers.Input(shape=(height, width, channels))

x = layers.Conv2D(128, 3)(discriminator_input)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Flatten()(x)
x = layers.Dropout(0.4)(x)
x = layers.Dense(1, activation='sigmoid')(x)

discriminator = keras.models.Model(discriminator_input, x)
discriminator.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_2 (InputLayer)         (None, 32, 32, 3)         0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 30, 30, 128)       3584      
_________________________________________________________________
leaky_re_lu_6 (LeakyReLU)    (None, 30, 30, 128)       0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 14, 14, 128)       262272    
_________________________________________________________________
leaky_re_lu_7 (LeakyReLU)    (None, 14, 14, 128)       0         
_________________________________________________________________
conv2d_7 (Conv2D)            (None, 6, 6, 128)         262272    
_________________________________________________________________
leaky_re_lu_8 (LeakyReLU)    (None, 6, 6, 128)         0         
_________________________________________________________________
conv2d_8 (Conv2D)            (None, 2, 2, 128)         262272    
_________________________________________________________________
leaky_re_lu_9 (LeakyReLU)    (None, 2, 2, 128)         0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 512)               0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 512)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 513       
=================================================================
Total params: 790,913
Trainable params: 790,913
Non-trainable params: 0
_________________________________________________________________

In [5]:
discriminator_optimizer = keras.optimizers.RMSprop(
    lr=0.0008,
    clipvalue=1.0,
    decay=1e-8)
discriminator.compile(optimizer=discriminator_optimizer, loss='binary_crossentropy')

In [6]:
discriminator.trainable = False

gan_input = keras.Input(shape=(latent_dim, ))
gan_output = discriminator(generator(gan_input))
gan = keras.models.Model(gan_input, gan_output)

In [7]:
gan_optimizer = keras.optimizers.RMSprop(lr=0.0004, clipvalue=1.0, decay=1e-8)
gan.compile(optimizer=gan_optimizer, loss='binary_crossentropy')

Train our DCGAN


In [8]:
import os
from keras.preprocessing import image

(x_train, y_train), (_, _) = keras.datasets.cifar10.load_data()

In [9]:
x_train = x_train[y_train.flatten() == 8] # ship images
x_train = x_train.reshape(
    (x_train.shape[0], ) + 
    (height, width, channels)).astype('float32') / 255. # normalizes data

In [10]:
iterations = 10000
batch_size = 20
save_dir = "E:\\temp\\dcgan"

In [11]:
start = 0

for step in range(iterations):
    # samples random points in the latent space
    random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))
    
    # generate fake images
    generated_images = generator.predict(random_latent_vectors)
    
    # get real images
    stop = start + batch_size
    real_images = x_train[start: stop]

    # create training data for discriminator
    combined_images = np.concatenate([generated_images, real_images]) 
    # label = 1 => fake, label = 0 => real world
    labels = np.concatenate([np.ones((batch_size, 1)), np.zeros((batch_size, 1))])
    
    # train discriminator
    d_loss = discriminator.train_on_batch(combined_images, labels)
    
    # samples random points in the latent space
    random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))
    
    # create fake targets (label 0 but it's from gan, not real world)
    misleading_targets = np.zeros((batch_size, 1))
    
    # train generator
    a_loss = gan.train_on_batch(random_latent_vectors, misleading_targets)
    
    start += batch_size
    if start > len(x_train) - batch_size:
        start = 0
    
    if step % 100 == 0:
        gan.save_weights('gan.h5')
        
        print("discriminator loss: ", d_loss)
        print("adversarial loss: ", a_loss)
        
        img = image.array_to_img(generated_images[0] * 255., scale=False)
        img.save(os.path.join(save_dir, 'generated_ship' + str(step) + '.png'))
        
        img = image.array_to_img(real_images[0] * 255., scale=False)
        img.save(os.path.join(save_dir, 'real_ship' + str(step) + '.png'))


E:\Miniconda3\envs\env3-gpu\lib\site-packages\keras\engine\training.py:973: UserWarning: Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?
  'Discrepancy between trainable weights and collected trainable'
discriminator loss:  0.667364
adversarial loss:  0.65996
discriminator loss:  0.310827
adversarial loss:  2.94974
discriminator loss:  0.783311
adversarial loss:  0.780401
discriminator loss:  0.694457
adversarial loss:  0.684183
discriminator loss:  0.698418
adversarial loss:  0.696844
discriminator loss:  0.692233
adversarial loss:  0.729003
discriminator loss:  0.704063
adversarial loss:  0.681386
discriminator loss:  0.70218
adversarial loss:  0.728991
discriminator loss:  0.700803
adversarial loss:  0.703575
discriminator loss:  0.708845
adversarial loss:  0.698349
discriminator loss:  0.694344
adversarial loss:  0.757346
discriminator loss:  0.692718
adversarial loss:  0.707597
discriminator loss:  0.696908
adversarial loss:  0.693149
discriminator loss:  0.6818
adversarial loss:  0.725513
discriminator loss:  0.696163
adversarial loss:  0.764356
discriminator loss:  0.695506
adversarial loss:  0.687942
discriminator loss:  0.691523
adversarial loss:  0.723755
discriminator loss:  0.832895
adversarial loss:  0.873291
discriminator loss:  0.676669
adversarial loss:  0.678802
discriminator loss:  0.721071
adversarial loss:  0.775117
discriminator loss:  0.690303
adversarial loss:  0.682089
discriminator loss:  0.689406
adversarial loss:  0.721151
discriminator loss:  0.690112
adversarial loss:  0.746673
discriminator loss:  0.6971
adversarial loss:  0.743582
discriminator loss:  0.692515
adversarial loss:  0.743617
discriminator loss:  0.690871
adversarial loss:  0.679364
discriminator loss:  0.695154
adversarial loss:  0.743896
discriminator loss:  0.690279
adversarial loss:  0.711702
discriminator loss:  0.693325
adversarial loss:  0.65518
discriminator loss:  0.692047
adversarial loss:  0.738814
discriminator loss:  0.681264
adversarial loss:  0.787941
discriminator loss:  0.70321
adversarial loss:  0.705074
discriminator loss:  0.723223
adversarial loss:  0.6643
discriminator loss:  0.692098
adversarial loss:  0.839337
discriminator loss:  0.696735
adversarial loss:  0.703243
discriminator loss:  0.691811
adversarial loss:  0.714847
discriminator loss:  0.678181
adversarial loss:  0.760296
discriminator loss:  0.698291
adversarial loss:  0.713387
discriminator loss:  0.700002
adversarial loss:  0.692998
discriminator loss:  0.691213
adversarial loss:  0.732574
discriminator loss:  0.690969
adversarial loss:  0.677612
discriminator loss:  0.683808
adversarial loss:  0.622647
discriminator loss:  0.67719
adversarial loss:  0.729554
discriminator loss:  0.680941
adversarial loss:  0.728254
discriminator loss:  0.708413
adversarial loss:  0.739307
discriminator loss:  0.758943
adversarial loss:  0.8466
discriminator loss:  0.681582
adversarial loss:  0.703127
discriminator loss:  0.68256
adversarial loss:  0.66785
discriminator loss:  0.678411
adversarial loss:  0.781454
discriminator loss:  0.681787
adversarial loss:  0.672391
discriminator loss:  0.733528
adversarial loss:  0.688703
discriminator loss:  0.679794
adversarial loss:  0.756164
discriminator loss:  0.704405
adversarial loss:  0.810244
discriminator loss:  0.692479
adversarial loss:  0.686841
discriminator loss:  0.673176
adversarial loss:  0.755301
discriminator loss:  0.696267
adversarial loss:  0.730609
discriminator loss:  0.694863
adversarial loss:  0.685084
discriminator loss:  0.671044
adversarial loss:  0.736713
discriminator loss:  0.691204
adversarial loss:  0.700035
discriminator loss:  0.694757
adversarial loss:  0.666988
discriminator loss:  0.687353
adversarial loss:  0.675255
discriminator loss:  0.696204
adversarial loss:  0.669778
discriminator loss:  0.697358
adversarial loss:  0.704635
discriminator loss:  0.683506
adversarial loss:  0.757902
discriminator loss:  0.691992
adversarial loss:  0.741452
discriminator loss:  0.690666
adversarial loss:  0.627478
discriminator loss:  0.696207
adversarial loss:  0.706278
discriminator loss:  0.704672
adversarial loss:  0.588951
discriminator loss:  0.69026
adversarial loss:  0.701475
discriminator loss:  0.699164
adversarial loss:  0.773423
discriminator loss:  0.709344
adversarial loss:  0.660534
discriminator loss:  1.30798
adversarial loss:  0.792509
discriminator loss:  0.687747
adversarial loss:  0.741277
discriminator loss:  0.705557
adversarial loss:  0.985313
discriminator loss:  0.681735
adversarial loss:  0.63674
discriminator loss:  0.693084
adversarial loss:  0.74392
discriminator loss:  0.705258
adversarial loss:  0.760666
discriminator loss:  1.24428
adversarial loss:  1.34349
discriminator loss:  0.649255
adversarial loss:  0.791957
discriminator loss:  0.669673
adversarial loss:  0.65302
discriminator loss:  0.706598
adversarial loss:  0.71287
discriminator loss:  0.707131
adversarial loss:  0.651814
discriminator loss:  0.694643
adversarial loss:  0.665882
discriminator loss:  0.693836
adversarial loss:  0.687068
discriminator loss:  0.685271
adversarial loss:  0.718548
discriminator loss:  0.672186
adversarial loss:  0.574836
discriminator loss:  0.700636
adversarial loss:  0.736094
discriminator loss:  0.693934
adversarial loss:  0.768581
discriminator loss:  0.686402
adversarial loss:  0.620225
discriminator loss:  0.68291
adversarial loss:  0.785735
discriminator loss:  0.699683
adversarial loss:  0.751179
discriminator loss:  0.691545
adversarial loss:  0.741234
discriminator loss:  0.710003
adversarial loss:  0.738212
discriminator loss:  0.693068
adversarial loss:  0.663954
discriminator loss:  0.68692
adversarial loss:  0.684967
discriminator loss:  0.707847
adversarial loss:  0.677615
discriminator loss:  0.688751
adversarial loss:  0.743023
discriminator loss:  0.699885
adversarial loss:  0.688359
discriminator loss:  0.702287
adversarial loss:  0.709547
discriminator loss:  0.717131
adversarial loss:  0.803051

In [ ]: